From: Keir Fraser Date: Thu, 2 Jul 2009 15:45:31 +0000 (+0100) Subject: Remove page-scrub lists and async scrubbing. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13655 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22Dat/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22Dat?a=commitdiff_plain;h=428dc303f59bf749bd9437e44db1f54d12bdb5bc;p=xen.git Remove page-scrub lists and async scrubbing. The original user for this was domain destruction. Now that this is preemptible all the way back up to dom0 userspace, asynchrony is better iontroduced at that level, if at all, imo. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c index f27738d42d..37e9b9fefb 100644 --- a/xen/arch/ia64/xen/dom0_ops.c +++ b/xen/arch/ia64/xen/dom0_ops.c @@ -718,7 +718,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl) pi->nr_nodes = num_online_nodes(); pi->total_pages = total_pages; pi->free_pages = avail_domheap_pages(); - pi->scrub_pages = avail_scrub_pages(); + pi->scrub_pages = 0; pi->cpu_khz = local_cpu_data->proc_freq / 1000; pi->max_cpu_id = last_cpu(cpu_online_map); diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index 7dde9bf33a..c8ba9e28eb 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -360,7 +360,6 @@ static void continue_cpu_idle_loop(void) #else irq_stat[cpu].idle_timestamp = jiffies; #endif - page_scrub_schedule_work(); while ( !softirq_pending(cpu) ) default_idle(); raise_softirq(SCHEDULE_SOFTIRQ); diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 48b41a2bda..722d518bba 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -120,7 +120,6 @@ void idle_loop(void) { if ( cpu_is_offline(smp_processor_id()) ) play_dead(); - page_scrub_schedule_work(); (*pm_idle)(); do_softirq(); } diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index faf3f5157c..651dbcfdd3 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -67,7 +67,7 @@ long arch_do_sysctl( pi->nr_nodes = num_online_nodes(); pi->total_pages = total_pages; pi->free_pages = avail_domheap_pages(); - pi->scrub_pages = avail_scrub_pages(); + pi->scrub_pages = 0; pi->cpu_khz = cpu_khz; memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4); if ( hvm_enabled ) diff --git a/xen/common/domain.c b/xen/common/domain.c index 3b21006d36..a2507a7b3d 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -394,7 +394,6 @@ int domain_kill(struct domain *d) /* fallthrough */ case DOMDYING_dying: rc = domain_relinquish_resources(d); - page_scrub_kick(); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index bd514cfeed..9147e8a2b1 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -64,18 +64,6 @@ integer_param("dma_bits", dma_bitsize); #define round_pgdown(_p) ((_p)&PAGE_MASK) #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK) -#ifndef NDEBUG -/* Avoid callers relying on allocations returning zeroed pages. */ -#define scrub_page(p) memset((p), 0xc2, PAGE_SIZE) -#else -/* For a production build, clear_page() is the fastest way to scrub. */ -#define scrub_page(p) clear_page(p) -#endif - -static DEFINE_SPINLOCK(page_scrub_lock); -PAGE_LIST_HEAD(page_scrub_list); -static unsigned long scrub_pages; - /* Offlined page list, protected by heap_lock. */ PAGE_LIST_HEAD(page_offlined_list); /* Broken page list, protected by heap_lock. */ @@ -945,7 +933,6 @@ void __init end_boot_allocator(void) */ void __init scrub_heap_pages(void) { - void *p; unsigned long mfn; if ( !opt_bootscrub ) @@ -969,21 +956,7 @@ void __init scrub_heap_pages(void) /* Re-check page status with lock held. */ if ( !allocated_in_map(mfn) ) - { - if ( is_xen_heap_mfn(mfn) ) - { - p = page_to_virt(mfn_to_page(mfn)); - memguard_unguard_range(p, PAGE_SIZE); - scrub_page(p); - memguard_guard_range(p, PAGE_SIZE); - } - else - { - p = map_domain_page(mfn); - scrub_page(p); - unmap_domain_page(p); - } - } + scrub_one_page(mfn_to_page(mfn)); spin_unlock(&heap_lock); } @@ -1247,10 +1220,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order) for ( i = 0; i < (1 << order); i++ ) { page_set_owner(&pg[i], NULL); - spin_lock(&page_scrub_lock); - page_list_add(&pg[i], &page_scrub_list); - scrub_pages++; - spin_unlock(&page_scrub_lock); + scrub_one_page(&pg[i]); } } } @@ -1322,96 +1292,19 @@ static __init int pagealloc_keyhandler_init(void) __initcall(pagealloc_keyhandler_init); - -/************************* - * PAGE SCRUBBING - */ - -static DEFINE_PER_CPU(struct timer, page_scrub_timer); - -static void page_scrub_softirq(void) -{ - PAGE_LIST_HEAD(list); - struct page_info *pg; - void *p; - int i; - s_time_t start = NOW(); - static spinlock_t serialise_lock = SPIN_LOCK_UNLOCKED; - - /* free_heap_pages() does not parallelise well. Serialise this function. */ - if ( !spin_trylock(&serialise_lock) ) - { - set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(1)); - return; - } - - /* Aim to do 1ms of work every 10ms. */ - do { - spin_lock(&page_scrub_lock); - - /* Peel up to 16 pages from the list. */ - for ( i = 0; i < 16; i++ ) - { - if ( !(pg = page_list_remove_head(&page_scrub_list)) ) - break; - page_list_add_tail(pg, &list); - } - - if ( unlikely(i == 0) ) - { - spin_unlock(&page_scrub_lock); - goto out; - } - - scrub_pages -= i; - - spin_unlock(&page_scrub_lock); - - /* Scrub each page in turn. */ - while ( (pg = page_list_remove_head(&list)) ) { - p = map_domain_page(page_to_mfn(pg)); - scrub_page(p); - unmap_domain_page(p); - free_heap_pages(pg, 0); - } - } while ( (NOW() - start) < MILLISECS(1) ); - - set_timer(&this_cpu(page_scrub_timer), NOW() + MILLISECS(10)); - - out: - spin_unlock(&serialise_lock); -} - -void scrub_list_splice(struct page_list_head *list) -{ - spin_lock(&page_scrub_lock); - page_list_splice(list, &page_scrub_list); - spin_unlock(&page_scrub_lock); -} - -void scrub_list_add(struct page_info *pg) -{ - spin_lock(&page_scrub_lock); - page_list_add(pg, &page_scrub_list); - spin_unlock(&page_scrub_lock); -} - void scrub_one_page(struct page_info *pg) { void *p = map_domain_page(page_to_mfn(pg)); - scrub_page(p); - unmap_domain_page(p); -} - -static void page_scrub_timer_fn(void *unused) -{ - page_scrub_schedule_work(); -} +#ifndef NDEBUG + /* Avoid callers relying on allocations returning zeroed pages. */ + memset(p, 0xc2, PAGE_SIZE); +#else + /* For a production build, clear_page() is the fastest way to scrub. */ + clear_page(p); +#endif -unsigned long avail_scrub_pages(void) -{ - return scrub_pages; + unmap_domain_page(p); } static void dump_heap(unsigned char key) @@ -1439,18 +1332,6 @@ static __init int register_heap_trigger(void) } __initcall(register_heap_trigger); - -static __init int page_scrub_init(void) -{ - int cpu; - for_each_cpu ( cpu ) - init_timer(&per_cpu(page_scrub_timer, cpu), - page_scrub_timer_fn, NULL, cpu); - open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq); - return 0; -} -__initcall(page_scrub_init); - /* * Local variables: * mode: C diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index f168535a2a..9911e802ed 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -195,12 +195,14 @@ EXPORT unsigned long tmh_page_list_pages = 0; EXPORT void tmh_release_avail_pages_to_host(void) { spin_lock(&tmh_page_list_lock); - if ( !page_list_empty(&tmh_page_list) ) + while ( !page_list_empty(&tmh_page_list) ) { - scrub_list_splice(&tmh_page_list); - INIT_PAGE_LIST_HEAD(&tmh_page_list); - tmh_page_list_pages = 0; + struct page_info *pg = page_list_first(&tmh_page_list); + scrub_one_page(pg); + free_domheap_page(pg); } + INIT_PAGE_LIST_HEAD(&tmh_page_list); + tmh_page_list_pages = 0; spin_unlock(&tmh_page_list_lock); } diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 71e3181e2a..d00ac0a4bb 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -299,22 +299,7 @@ page_list_splice(struct page_list_head *list, struct page_list_head *head) # define page_list_splice(list, hd) list_splice(list, hd) #endif -/* Automatic page scrubbing for dead domains. */ -extern struct page_list_head page_scrub_list; -#define page_scrub_schedule_work() \ - do { \ - if ( !page_list_empty(&page_scrub_list) ) \ - raise_softirq(PAGE_SCRUB_SOFTIRQ); \ - } while ( 0 ) -#define page_scrub_kick() \ - do { \ - if ( !page_list_empty(&page_scrub_list) ) \ - cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ); \ - } while ( 0 ) -void scrub_list_splice(struct page_list_head *); -void scrub_list_add(struct page_info *); void scrub_one_page(struct page_info *); -unsigned long avail_scrub_pages(void); int guest_remove_page(struct domain *d, unsigned long gmfn);